3ddb79b7UG2QiRAU-Wvc1Y_BLigu1Q xenolinux-2.4.16-sparse/arch/xeno/drivers/console/console.c
3ddb79b75eo4PRXkT6Th9popt_SJhg xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/Makefile
3ddb79b7Xyaoep6U0kLvx6Kx7OauDw xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c
+3df9ce13K7qSLBtHV-01QHPW62649Q xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c
3ddb79b7PulSkF9m3c7K5MkxHRf4hA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_ops.h
+3df9ce13tITy-OuYx_zQemsvqqLTWA xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/hypervisor_defs.h
3ddba759XOjcl_OF-52dOYq7sgMykQ xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/vfr.c
3ddb79b7s7yYBioHidSkIoHtQxYmOw xenolinux-2.4.16-sparse/arch/xeno/drivers/network/Makefile
3ddb79b7CpLL98ScdpbKkVBktlbCtQ xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
3ddb79b8qdD_svLCCAja_oP2w4Tn8Q xenolinux-2.4.16-sparse/arch/xeno/mm/Makefile
3ddb79b8ukY8dsPYmR8eNk-aCzFPsQ xenolinux-2.4.16-sparse/arch/xeno/mm/extable.c
3ddb79b856Zta9b3s0bgUCGbG1blvQ xenolinux-2.4.16-sparse/arch/xeno/mm/fault.c
+3df9ce13dZ6UGDjZbUeZfyH4Hy6aCA xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c
3ddb79b85fpsKT8A9WYnuJg03b715g xenolinux-2.4.16-sparse/arch/xeno/mm/hypervisor.c
3ddb79b83Zj7Xn2QVhU4HeMuAC9FjA xenolinux-2.4.16-sparse/arch/xeno/mm/init.c
+3df9ce13TRWIv0Mawm15zESP7jcT7A xenolinux-2.4.16-sparse/arch/xeno/mm/mmu_context.c
3ddb79b7aKdTkbr3u6aze8tVwGh_TQ xenolinux-2.4.16-sparse/arch/xeno/vmlinux.lds
3ddb79bbx682YH6vR2zbVOXwg73ULg xenolinux-2.4.16-sparse/drivers/block/ll_rw_blk.c
3ddb79bcJfHdwrPsjqgI33_OsGdVCg xenolinux-2.4.16-sparse/drivers/block/rd.c
kaf24@labyrinth.cl.cam.ac.uk
kaf24@plym.cl.cam.ac.uk
kaf24@striker.cl.cam.ac.uk
+lynx@idefix.cl.cam.ac.uk
smh22@boulderdash.cl.cam.ac.uk
+
/******************************************************************************
* dom0_ops.c
*
#include <xeno/sched.h>
#include <xeno/event.h>
+extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
static unsigned int get_domnr(void)
{
switch ( op.cmd )
{
+ case DOM0_STARTDOM:
+ {
+ struct task_struct * p = find_domain_by_id(op.u.meminfo.domain);
+ ret = final_setup_guestos(p, &op.u.meminfo);
+ if( ret != 0 ){
+ p->state = TASK_DYING;
+ release_task(p);
+ break;
+ }
+ wake_up(p);
+ reschedule(p);
+ ret = p->domain;
+ }
+ break;
+
case DOM0_NEWDOMAIN:
{
struct task_struct *p;
p->domain = dom;
pro = (pro+1) % smp_num_cpus;
p->processor = pro;
+
+ /* if we are not booting dom 0 than only mem
+ * needs to be allocated
+ */
+ if(dom != 0){
+ if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
+ ret = -1;
+ break;
+ }
+ ret = p->domain;
+ break;
+ }
+
+ /* executed only in case of domain 0 */
ret = setup_guestos(p, &op.u.newdomain); /* Load guest OS into @p */
if ( ret != 0 )
{
}
break;
+ case DOM0_MAPTASK:
+ {
+ unsigned int dom = op.u.mapdomts.domain;
+
+ op.u.mapdomts.ts_phy_addr = __pa(find_domain_by_id(dom));
+ copy_to_user(u_dom0_op, &op, sizeof(op));
+
+ }
+ break;
+
default:
ret = -ENOSYS;
+
#include <xeno/config.h>
#include <xeno/init.h>
#include <xeno/lib.h>
#include <xeno/dom0_ops.h>
#include <asm/io.h>
+#include <asm/msr.h>
+#include <xeno/multiboot.h>
+
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
+
+extern int nr_mods;
+extern module_t *mod;
+extern unsigned char *cmdline;
+
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
schedule_data_t schedule_data[NR_CPUS];
}
-static unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
+unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
{
struct list_head *temp;
return 0;
}
+int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
+{
+ l2_pgentry_t * l2tab;
+ l1_pgentry_t * l1tab;
+ start_info_t * virt_startinfo_addr;
+ unsigned long virt_stack_addr;
+ unsigned long long time;
+ net_ring_t *net_ring;
+ char *dst; // temporary
+ int i; // temporary
+
+ /* entries 0xe0000000 onwards in page table must contain hypervisor
+ * mem mappings - set them up.
+ */
+ l2tab = (l2_pgentry_t *)__va(meminfo->l2_pgt_addr);
+ memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ ((l2_pgentry_t *)idle0_pg_table) + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t));
+ p->mm.pagetable = mk_pagetable((unsigned long)l2tab);
+
+ /* map in the shared info structure */
+ l2tab = pagetable_ptr(p->mm.pagetable) + l2_table_offset(meminfo->virt_shinfo_addr);
+ l1tab = l2_pgentry_to_l1(*l2tab) + l1_table_offset(meminfo->virt_shinfo_addr);
+ *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
+
+ /* set up the shared info structure */
+ rdtscll(time);
+ p->shared_info->wall_time = time;
+ p->shared_info->domain_time = time;
+ p->shared_info->ticks_per_ms = ticks_per_usec * 1000;
+
+ /* we pass start info struct to guest os as function parameter on stack */
+ virt_startinfo_addr = (start_info_t *)meminfo->virt_startinfo_addr;
+ virt_stack_addr = (unsigned long)virt_startinfo_addr;
+
+ /* we need to populate start_info struct within the context of the
+ * new domain. thus, temporarely install its pagetables.
+ */
+ __cli();
+ __asm__ __volatile__ (
+ "mov %%eax, %%cr3"
+ : : "a" (__pa(pagetable_ptr(p->mm.pagetable))));
+
+ memset(virt_startinfo_addr, 0, sizeof(virt_startinfo_addr));
+ virt_startinfo_addr->nr_pages = p->tot_pages;
+ virt_startinfo_addr->shared_info = (shared_info_t *)meminfo->virt_shinfo_addr;
+ virt_startinfo_addr->pt_base = meminfo->virt_load_addr +
+ ((p->tot_pages - 1) << PAGE_SHIFT);
+
+ /* now, this is just temprorary before we switch to pseudo phys
+ * addressing. this works only for contiguous chunks of memory!!!
+ */
+ virt_startinfo_addr->phys_base = p->pg_head << PAGE_SHIFT;
+
+ /* Add virtual network interfaces and point to them in startinfo. */
+ while (meminfo->num_vifs-- > 0) {
+ net_ring = create_net_vif(p->domain);
+ if (!net_ring) panic("no network ring!\n");
+ }
+ virt_startinfo_addr->net_rings = p->net_ring_base;
+ virt_startinfo_addr->num_net_rings = p->num_net_vifs;
+
+ /* Add block io interface */
+ virt_startinfo_addr->blk_ring = p->blk_ring_base;
+
+ /* i do not think this has to be done any more, temporary */
+ /* We tell OS about any modules we were given. */
+ if ( nr_mods > 1 )
+ {
+ virt_startinfo_addr->mod_start =
+ (mod[1].mod_start-mod[0].mod_start-12) + meminfo->virt_load_addr;
+ virt_startinfo_addr->mod_len =
+ mod[nr_mods-1].mod_end - mod[1].mod_start;
+ }
+
+ /* temporary, meminfo->cmd_line just needs to be copied info start info */
+ dst = virt_startinfo_addr->cmd_line;
+ if ( mod[0].string )
+ {
+ char *modline = (char *)__va(mod[0].string);
+ for ( i = 0; i < 255; i++ )
+ {
+ if ( modline[i] == '\0' ) break;
+ *dst++ = modline[i];
+ }
+ }
+ *dst = '\0';
+
+ if ( opt_nfsroot )
+ {
+ unsigned char boot[150];
+ unsigned char ipbase[20], nfsserv[20], gateway[20], netmask[20];
+ unsigned char nfsroot[70];
+ snprintf(nfsroot, 70, opt_nfsroot, p->domain);
+ snprintf(boot, 200,
+ " root=/dev/nfs ip=%s:%s:%s:%s::eth0:off nfsroot=%s",
+ quad_to_str(opt_ipbase + p->domain, ipbase),
+ quad_to_str(opt_nfsserv, nfsserv),
+ quad_to_str(opt_gateway, gateway),
+ quad_to_str(opt_netmask, netmask),
+ nfsroot);
+ strcpy(dst, boot);
+ }
+
+ /* Reinstate the caller's page tables. */
+ __asm__ __volatile__ (
+ "mov %%eax,%%cr3"
+ : : "a" (__pa(pagetable_ptr(current->mm.pagetable))));
+ __sti();
+
+ new_thread(p,
+ (unsigned long)meminfo->virt_load_addr,
+ (unsigned long)virt_stack_addr,
+ (unsigned long)virt_startinfo_addr);
+
+ return 0;
+}
+
/*
* Initial load map:
* start_address:
* <one page>
*/
#define MB_PER_DOMAIN 16
-#include <asm/msr.h>
-#include <xeno/multiboot.h>
-extern int nr_mods;
-extern module_t *mod;
-extern unsigned char *cmdline;
int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
{
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
#define ALLOC_PAGE_FROM_DOMAIN() \
({ alloc_address -= PAGE_SIZE; __va(alloc_address); })
char *src, *dst;
l1_pgentry_t *l1tab = NULL;
struct pfn_info *page = NULL;
net_ring_t *net_ring;
+ blk_ring_t *blk_ring;
if ( strncmp(__va(mod[0].mod_start), "XenoGues", 8) )
{
virt_startinfo_address->phys_base = start_address;
/* NB. Next field will be NULL if dom != 0. */
virt_startinfo_address->frame_table = virt_ftable_start_addr;
+ virt_startinfo_address->frame_table_len = ft_size;
+ virt_startinfo_address->frame_table_pa = __pa(frame_table);
/* Add virtual network interfaces and point to them in startinfo. */
while (params->num_vifs-- > 0) {
+
+
/******************************************************************************
* memory.c
*
struct pfn_info *page;
ASSERT(page_nr < max_page);
page = frame_table + page_nr;
- ASSERT((page->flags & PG_domain_mask) == current->domain);
- ASSERT((((page->flags & PG_type_mask) == PGT_writeable_page) &&
- (page_type_count(page) != 0)) ||
- (((page->flags & PG_type_mask) == PGT_none) &&
- (page_type_count(page) == 0)));
+
+ if(current->domain != 0){
+ ASSERT((page->flags & PG_domain_mask) == current->domain);
+ ASSERT((((page->flags & PG_type_mask) == PGT_writeable_page) &&
+ (page_type_count(page) != 0)) ||
+ (((page->flags & PG_type_mask) == PGT_none) &&
+ (page_type_count(page) == 0)));
+ }
+
ASSERT((!writeable) || (page_type_count(page) != 0));
if ( writeable && (put_page_type(page) == 0) )
page->flags &= ~PG_type_mask;
kill_domain_with_errmsg("Cannot read page update request");
}
- err = 1;
-
pfn = cur.ptr >> PAGE_SHIFT;
- if ( !pfn )
- {
- switch ( cur.ptr )
- {
- case PGREQ_ADD_BASEPTR:
- err = get_l2_table(cur.val >> PAGE_SHIFT);
- break;
- case PGREQ_REMOVE_BASEPTR:
- if ( cur.val == __pa(pagetable_ptr(current->mm.pagetable)) )
- {
- MEM_LOG("Attempt to remove current baseptr! %08lx",
- cur.val);
- }
- else
- {
- err = put_l2_table(cur.val >> PAGE_SHIFT);
- }
- break;
- default:
- MEM_LOG("Invalid page update command %08lx", cur.ptr);
- break;
- }
- }
- else if ( (cur.ptr & (sizeof(l1_pgentry_t)-1)) || (pfn >= max_page) )
+ if ( pfn >= max_page )
{
- MEM_LOG("Page out of range (%08lx>%08lx) or misalign %08lx",
- pfn, max_page, cur.ptr);
+ MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
+ kill_domain_with_errmsg("Page update request out of range");
}
- else
+
+ err = 1;
+
+ /* Least significant bits of 'ptr' demux the operation type. */
+ switch ( cur.ptr & (sizeof(l1_pgentry_t)-1) )
{
+
+ /*
+ * PGREQ_NORMAL: Normal update to any level of page table.
+ */
+ case PGREQ_NORMAL:
page = frame_table + pfn;
flags = page->flags;
if ( (flags & PG_domain_mask) == current->domain )
mk_l2_pgentry(cur.val));
break;
default:
- /*
- * This might occur if a page-table update is
- * requested before we've inferred the type
- * of the containing page. It shouldn't happen
- * if page tables are built strictly top-down, so
- * we have a MEM_LOG warning message.
- */
- MEM_LOG("Unnecessary update to non-pt page %08lx",
- cur.ptr);
- *(unsigned long *)__va(cur.ptr) = cur.val;
- err = 0;
+ MEM_LOG("Update to non-pt page %08lx", cur.ptr);
break;
}
}
+ break;
+
+ /*
+ * PGREQ_ADD_BASEPTR: Announce a new top-level page table.
+ */
+ case PGREQ_ADD_BASEPTR:
+ err = get_l2_table(cur.val >> PAGE_SHIFT);
+ break;
+
+ /*
+ * PGREQ_REMOVE_BASEPTR: Destroy reference to a top-level page
+ * table.
+ */
+ case PGREQ_REMOVE_BASEPTR:
+ pfn = cur.val >> PAGE_SHIFT;
+ if ( pfn != (__pa(pagetable_ptr(current->mm.pagetable))
+ >> PAGE_SHIFT) )
+ {
+ err = put_l2_table(pfn);
+ }
+ else
+ {
+ MEM_LOG("Attempt to remove current baseptr! %08lx",
+ cur.val);
+ }
+ break;
+
+ /*
+ * PGREQ_UNCHECKED_UPDATE: Make an unchecked update to a
+ * bottom-level page-table entry.
+ * Restrictions apply:
+ * 1. Update only allowed by domain 0.
+ * 2. Update must be to a level-1 pte belonging to dom0.
+ */
+ case PGREQ_UNCHECKED_UPDATE:
+ cur.ptr &= ~(sizeof(l1_pgentry_t) - 1);
+ page = frame_table + pfn;
+ flags = page->flags;
+ if ( (flags | current->domain) == PGT_l1_page_table )
+ {
+ *(unsigned long *)__va(cur.ptr) = cur.val;
+ err = 0;
+ }
+ else
+ {
+ MEM_LOG("UNCHECKED_UPDATE: Bad domain %d, or"
+ " bad pte type %08lx", current->domain, flags);
+ }
+ break;
+
+ default:
+ MEM_LOG("Invalid page update command %08lx", cur.ptr);
+ break;
}
if ( err )
+
/******************************************************************************
* hypervisor-if.h
*
typedef struct
{
-#define PGREQ_ADD_BASEPTR 0
-#define PGREQ_REMOVE_BASEPTR 1
+/* PGREQ_XXX: specified in lest-significant bits of 'ptr' field. */
+/* A normal page-table update request. */
+#define PGREQ_NORMAL 0
+/* Announce a new top-level page table. */
+#define PGREQ_ADD_BASEPTR 1
+/* Destroy an existing top-level page table. */
+#define PGREQ_REMOVE_BASEPTR 2
+/* Make an unchecked update to a base-level pte. */
+#define PGREQ_UNCHECKED_UPDATE 3
unsigned long ptr, val; /* *ptr = val */
} page_update_request_t;
int num_net_rings;
blk_ring_t *blk_ring; /* block io communication rings */
unsigned long frame_table; /* mapping of the frame_table for dom0 */
+ unsigned long frame_table_len;
+ unsigned long frame_table_pa; /* frame_table physical address */
unsigned char cmd_line[1]; /* variable-length */
} start_info_t;
+
/******************************************************************************
* dom0_ops.h
*
* Process command requests from domain-0 guest OS.
*
- * Copyright (c) 2002, K A Fraser
+ * Copyright (c) 2002, K A Fraser, B Dragovic
*/
#ifndef __DOM0_OPS_H__
#define DOM0_NEWDOMAIN 0
#define DOM0_KILLDOMAIN 1
+#define DOM0_MAPTASK 2
+#define DOM0_STARTDOM 4
+
+#define MAX_CMD_LEN 256
-typedef struct dom0_newdomain_st
+typedef struct dom0_newdomain_st
{
unsigned int memory_kb;
- unsigned int num_vifs;
+ unsigned int num_vifs; // temporary
+ unsigned int domain;
} dom0_newdomain_t;
typedef struct dom0_killdomain_st
unsigned int domain;
} dom0_killdomain_t;
+typedef struct dom0_map_ts
+{
+ unsigned int domain;
+ unsigned long ts_phy_addr;
+} dom0_tsmap_t;
+
+typedef struct domain_launch
+{
+ unsigned long domain;
+ unsigned long l2_pgt_addr;
+ unsigned long virt_load_addr;
+ unsigned long virt_shinfo_addr;
+ unsigned long virt_startinfo_addr;
+ unsigned int num_vifs;
+ char cmd_line[MAX_CMD_LEN];
+} dom_meminfo_t;
+
typedef struct dom0_op_st
{
unsigned long cmd;
{
dom0_newdomain_t newdomain;
dom0_killdomain_t killdomain;
+ dom0_tsmap_t mapdomts;
+ dom_meminfo_t meminfo;
}
u;
} dom0_op_t;
+
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
#include <xeno/block.h>
struct task_struct {
+
int processor;
int state, hyp_events;
unsigned int domain;
+ /* index into frame_table threading pages belonging to this
+ * domain together. these are placed at the top of the structure
+ * to avoid nasty padding for various kernel structs when using
+ * task_struct in user space
+ */
+ unsigned long pg_head;
+ unsigned int tot_pages;
+
/* An unsafe pointer into a shared data area. */
shared_info_t *shared_info;
struct mm_struct *active_mm;
struct thread_struct thread;
struct task_struct *prev_task, *next_task;
-
- /* index into frame_table threading pages belonging to this
- * domain together
- */
- unsigned long pg_head;
- unsigned int tot_pages;
-
+
unsigned long flags;
};
extern struct task_struct *do_newdomain(void);
extern int setup_guestos(struct task_struct *p, dom0_newdomain_t *params);
+extern int final_setup_guestos(struct task_struct *p, dom_meminfo_t *);
struct task_struct *find_domain_by_id(unsigned int dom);
extern void release_task(struct task_struct *);
O_TARGET := dom0.o
-obj-y := dom0_core.o vfr.o
+obj-y := dom0_memory.o dom0_core.o vfr.o
include $(TOPDIR)/Rules.make
+
/******************************************************************************
* dom0_core.c
*
* Interface to privileged domain-0 commands.
*
- * Copyright (c) 2002, K A Fraser
+ * Copyright (c) 2002, K A Fraser, B Dragovic
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/swapctl.h>
+#include <linux/iobuf.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+
#include "dom0_ops.h"
+#include "hypervisor_defs.h"
+
+#define XENO_BASE "xeno" // proc file name defs should be in separate .h
+#define DOM0_CMD_INTF "dom0_cmd"
+#define DOM0_FT "frame_table"
+#define DOM0_NEWDOM "new_dom_id"
+
+#define MAX_LEN 16
+#define DOM_DIR "dom"
+#define DOM_TS "task_data"
+#define DOM_MEM "mem"
+
+static struct proc_dir_entry *xeno_base;
+static struct proc_dir_entry *dom0_cmd_intf;
+static struct proc_dir_entry *proc_ft;
-static struct proc_dir_entry *proc_dom0;
+unsigned long direct_mmap(unsigned long, unsigned long, pgprot_t, int, int);
+int direct_unmap(unsigned long, unsigned long);
+int direct_disc_unmap(unsigned long, unsigned long, int);
+
+/* frame_table mapped from dom0 */
+frame_table_t * frame_table;
+unsigned long frame_table_len;
+unsigned long frame_table_pa;
static unsigned char readbuf[1204];
-static int dom0_read_proc(char *page, char **start, off_t off,
+static int cmd_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
strcpy(page, readbuf);
return strlen(page);
}
+static ssize_t ts_read(struct file * file, char * buff, size_t size, loff_t * off)
+{
+ dom0_op_t op;
+ unsigned long addr;
+ pgprot_t prot;
+ int ret = 0;
+
+ /* retrieve domain specific data from proc_dir_entry */
+ dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+
+ /*
+ * get the phys addr of the task struct for the requested
+ * domain
+ */
+ op.cmd = DOM0_MAPTASK;
+ op.u.mapdomts.domain = dom_data->domain;
+ op.u.mapdomts.ts_phy_addr = -1;
+
+ ret = HYPERVISOR_dom0_op(&op);
+ if(ret != 0)
+ return -EAGAIN;
+
+ prot = PAGE_SHARED;
+
+ /* remap the range using xen specific routines */
+ addr = direct_mmap(op.u.mapdomts.ts_phy_addr, PAGE_SIZE, prot, 0, 0);
+ copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
+ dom_data->map_size = PAGE_SIZE;
+
+ return sizeof(addr);
+
+}
+
+static ssize_t ts_write(struct file * file, const char * buff, size_t size , loff_t * off)
+{
+ unsigned long addr;
+ dom_procdata_t * dom_data = (dom_procdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+
+ copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
+
+ if(direct_unmap(addr, dom_data->map_size) == 0){
+ return sizeof(addr);
+ } else {
+ return -1;
+ }
+}
+
+struct file_operations ts_ops = {
+ read: ts_read,
+ write: ts_write,
+};
+
+static void create_proc_dom_entries(int dom)
+{
+ struct proc_dir_entry * dir;
+ struct proc_dir_entry * file;
+ dom_procdata_t * dom_data;
+ char dir_name[MAX_LEN];
+
+ snprintf(dir_name, MAX_LEN, "%s%d", DOM_DIR, dom);
+
+ dom_data = (dom_procdata_t *)kmalloc(GFP_KERNEL, 128);
+ dom_data->domain = dom;
+
+ dir = proc_mkdir(dir_name, xeno_base);
+ dir->data = dom_data;
+
+ file = create_proc_entry(DOM_TS, 0600, dir);
+ if(file != NULL)
+ {
+ file->owner = THIS_MODULE;
+ file->nlink = 1;
+ file->proc_fops = &ts_ops;
+
+ file->data = dom_data;
+ }
+}
+
+static int dom_mem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+
+ /*
+ * do not dump contents to core file,
+ * do not swap out.
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ if(direct_remap_disc_page_range(vma->vm_start, mem_data->pfn, mem_data->tot_pages,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static ssize_t dom_mem_write(struct file * file, const char * buff, size_t size ,
+ loff_t * off)
+{
+ unsigned long addr;
+ proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+
+ copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
+
+ if(direct_disc_unmap(addr, mem_data->pfn, mem_data->tot_pages) == 0){
+ return sizeof(addr);
+ } else {
+ return -1;
+ }
+}
+
+static ssize_t dom_mem_read(struct file * file, char * buff, size_t size, loff_t * off)
+{
+ unsigned long addr;
+ pgprot_t prot;
+ proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+
+ prot = PAGE_SHARED;
+
+ /* remap the range using xen specific routines */
+ addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, 0, 0);
+ //addr = direct_mmap(mem_data->pfn, mem_data->tot_pages << PAGE_SHIFT, prot, 1,
+ // mem_data->tot_pages);
+ copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
+
+ return sizeof(addr);
+
+}
-static int dom0_write_proc(struct file *file, const char *buffer,
+struct file_operations dom_mem_ops = {
+ read: dom_mem_read,
+ write: dom_mem_write,
+};
+
+static int dom_map_mem(unsigned int dom, unsigned long pfn, int tot_pages)
+{
+ int ret = -ENOENT;
+ struct proc_dir_entry * pd = xeno_base->subdir;
+ struct proc_dir_entry * file;
+ proc_memdata_t * memdata;
+
+ while(pd != NULL){
+ if(((dom_procdata_t *)pd->data)->domain == dom){
+
+ /* check if there is already an entry for mem and if so
+ * remove it.
+ */
+ remove_proc_entry(DOM_MEM, pd);
+
+ /* create new entry with parameters describing what to do
+ * when it is mmaped.
+ */
+ file = create_proc_entry(DOM_MEM, 0600, pd);
+ if(file != NULL)
+ {
+ file->owner = THIS_MODULE;
+ file->nlink = 1;
+ file->proc_fops = &dom_mem_ops;
+
+ memdata = (proc_memdata_t *)kmalloc(GFP_KERNEL, sizeof(proc_memdata_t));
+ memdata->pfn = pfn;
+ memdata->tot_pages = tot_pages;
+ file->data = memdata;
+
+ ret = 0;
+ break;
+ }
+ ret = -EAGAIN;
+ break;
+ }
+ pd = pd->next;
+ }
+
+ return ret;
+}
+
+/* return dom id stored as data pointer to userspace */
+static int dom_id_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char arg[16];
+ sprintf(arg, "%d", (int)data);
+ strcpy(page, arg);
+ remove_proc_entry(DOM0_NEWDOM, xeno_base);
+ return sizeof(unsigned int);
+}
+
+static int cmd_write_proc(struct file *file, const char *buffer,
u_long count, void *data)
{
dom0_op_t op;
- unsigned char c;
- unsigned int val;
- unsigned char result[20];
- int len = count, ret;
+ int ret = 0;
+ struct proc_dir_entry * new_dom_id;
+
+ copy_from_user(&op, buffer, sizeof(dom0_op_t));
- while ( count )
- {
- c = *buffer++;
- count--;
- val = 0;
- if ( c == 'N' )
- {
- op.cmd = DOM0_NEWDOMAIN;
- while ( count && ((c = *buffer) >= '0') && (c <= '9') )
- {
- val *= 10;
- val += c - '0';
- buffer++; count--;
- }
- op.u.newdomain.memory_kb = val;
- val = 0;
- if (count && (*buffer == ','))
- {
- buffer++; count--;
- while ( count && ((c = *buffer) >= '0') && (c <= '9') )
- {
- val *= 10;
- val += c - '0';
- buffer++; count--;
- }
- }
- else
+ /* do some sanity checks */
+ if(op.cmd > MAX_CMD){
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ /* is the request intended for hypervisor? */
+ if(op.cmd != MAP_DOM_MEM){
+ ret = HYPERVISOR_dom0_op(&op);
+
+ /* if new domain created, create proc entries */
+ if(op.cmd == DOM0_NEWDOMAIN){
+ create_proc_dom_entries(ret);
+
+ /* now notify user space of the new domain's id */
+ new_dom_id = create_proc_entry(DOM0_NEWDOM, 0600, xeno_base);
+ if ( new_dom_id != NULL )
{
- val = 1; // default to 1 vif.
+ new_dom_id->owner = THIS_MODULE;
+ new_dom_id->nlink = 1;
+ new_dom_id->read_proc = dom_id_read_proc;
+ new_dom_id->data = (void *)ret;
}
- op.u.newdomain.num_vifs = val;
- ret = HYPERVISOR_dom0_op(&op);
- }
- else if ( c == 'K' )
- {
- op.cmd = DOM0_KILLDOMAIN;
- while ( count && ((c = *buffer) >= '0') && (c <= '9') )
- {
- val *= 10;
- val += c - '0';
- buffer++; count--;
- }
- op.u.killdomain.domain = val;
- ret = HYPERVISOR_dom0_op(&op);
- }
- else
- {
- ret = -ENOSYS;
+
}
-
- sprintf(result, "%d\n", ret);
- strcat(readbuf, result);
- while ( count-- && (*buffer++ != '\n') ) continue;
+ } else {
+
+ ret = dom_map_mem(op.u.reqdommem.domain, op.u.reqdommem.start_pfn,
+ op.u.reqdommem.tot_pages);
}
+
+out:
+ return ret;
+
+}
- return len;
+static ssize_t ft_write(struct file * file, const char * buff, size_t size , loff_t * off)
+{
+ unsigned long addr;
+
+ copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
+
+ if(direct_unmap(addr, frame_table_len) == 0){
+ return sizeof(addr);
+ } else {
+ return -1;
+ }
}
+static ssize_t ft_read(struct file * file, char * buff, size_t size, loff_t * off)
+{
+ unsigned long addr;
+ pgprot_t prot;
+
+ prot = PAGE_SHARED;
+
+ /* remap the range using xen specific routines */
+ addr = direct_mmap(frame_table_pa, frame_table_len, prot, 0, 0);
+ copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
+
+ return sizeof(addr);
+
+}
+
+struct file_operations ft_ops = {
+ read: ft_read,
+ write: ft_write,
+};
static int __init init_module(void)
{
+
+ frame_table = (frame_table_t *)start_info.frame_table;
+ frame_table_len = start_info.frame_table_len;
+ frame_table_pa = start_info.frame_table_pa;
+
+ /* xeno proc root setup */
+ xeno_base = proc_mkdir(XENO_BASE, &proc_root);
+
+ /* xeno control interface */
*readbuf = '\0';
- proc_dom0 = create_proc_entry ("dom0", 0600, &proc_root);
- if ( proc_dom0 != NULL )
+ dom0_cmd_intf = create_proc_entry (DOM0_CMD_INTF, 0600, xeno_base);
+ if ( dom0_cmd_intf != NULL )
{
- proc_dom0->owner = THIS_MODULE;
- proc_dom0->nlink = 1;
- proc_dom0->read_proc = dom0_read_proc;
- proc_dom0->write_proc = dom0_write_proc;
- printk("Successfully installed domain-0 control interface\n");
+ dom0_cmd_intf->owner = THIS_MODULE;
+ dom0_cmd_intf->nlink = 1;
+ dom0_cmd_intf->read_proc = cmd_read_proc;
+ dom0_cmd_intf->write_proc = cmd_write_proc;
}
+
+ /* frame table mapping, to be mmaped */
+ proc_ft = create_proc_entry(DOM0_FT, 0600, xeno_base);
+ if(proc_ft != NULL)
+ {
+ proc_ft->owner = THIS_MODULE;
+ proc_ft->nlink = 1;
+ proc_ft->proc_fops = &ft_ops;
+ }
+
+ /* set up /proc entries for dom 0 */
+ create_proc_dom_entries(0);
+
return 0;
}
static void __exit cleanup_module(void)
{
- if ( proc_dom0 == NULL ) return;
+ if ( dom0_cmd_intf == NULL ) return;
remove_proc_entry("dom0", &proc_root);
- proc_dom0 = NULL;
+ dom0_cmd_intf = NULL;
}
--- /dev/null
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/swapctl.h>
+#include <linux/iobuf.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm/mmu.h>
+
+#include "hypervisor_defs.h"
+
+#define MAP_CONT 0
+#define MAP_DISCONT 1
+
+/*
+ * maps a range of physical memory into the requested pages. the old
+ * mappings are removed. any references to nonexistent pages results
+ * in null mappings (currently treated as "copy-on-access")
+ */
+
+/* bd240: functions below perform direct mapping to the real physical pages needed for
+ * mapping various hypervisor specific structures needed in dom0 userspace by various
+ * management applications such as domain builder etc.
+ */
+
+#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
+
+#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, 0)
+
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void forget_pte(pte_t page)
+{
+ if (!pte_none(page)) {
+ printk("forget_pte: old mapping existed!\n");
+ BUG();
+ }
+}
+
+static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long phys_addr, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t oldpage;
+ oldpage = ptep_get_and_clear(pte);
+
+ printk(KERN_ALERT "bd240 debug: %lx - %lx\n", address, phys_addr);
+
+ direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
+
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long phys_addr, pgprot_t prot)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ do {
+ pte_t * pte = pte_alloc(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ direct_remappte_range(pte, address, end - address, address + phys_addr, prot);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+/* Note: this is only safe if the mm semaphore is held when called. */
+int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = current->mm;
+
+ phys_addr -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(mm, beg, end);
+ if (from >= end)
+ BUG();
+
+ spin_lock(&mm->page_table_lock);
+ do {
+ pmd_t *pmd = pmd_alloc(mm, dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (from && (from < end));
+ spin_unlock(&mm->page_table_lock);
+ flush_tlb_range(mm, beg, end);
+ return error;
+}
+
+/*
+ * used for remapping discontiguous bits of domain's memory, pages to map are
+ * found from frame table beginning at the given first_pg index
+ */
+int direct_remap_disc_page_range(unsigned long from, unsigned long first_pg,
+ int tot_pages, pgprot_t prot)
+{
+ frame_table_t * current_ft;
+ unsigned long current_pfn;
+ unsigned long start = from;
+ int count = 0;
+
+ current_ft = (frame_table_t *)(frame_table + first_pg);
+ current_pfn = first_pg;
+ while(count < tot_pages){
+ if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT, PAGE_SIZE, prot))
+ goto out;
+ start += PAGE_SIZE;
+ current_pfn = current_ft->next;
+ current_ft = (frame_table_t *)(frame_table + current_pfn);
+ count++;
+ }
+
+out:
+
+ return tot_pages - count;
+}
+
+/* below functions replace standard sys_mmap and sys_munmap which are absolutely useless
+ * for direct memory mapping. direct_zap* functions are minor ammendments to the
+ * original versions in mm/memory.c. the changes are to enable unmapping of real physical
+ * addresses.
+ */
+
+unsigned long direct_mmap(unsigned long phys_addr, unsigned long size,
+ pgprot_t prot, int flag, int tot_pages)
+{
+ direct_mmap_node_t * dmmap;
+ unsigned long addr;
+ int ret = 0;
+
+ if(!capable(CAP_SYS_ADMIN)){
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* get unmapped area invokes xen specific arch_get_unmapped_area */
+ addr = get_unmapped_area(NULL, 0, size, 0, 0);
+ if(addr & ~PAGE_MASK){
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* add node on the list of directly mapped areas */
+ dmmap = (direct_mmap_node_t *)kmalloc(GFP_KERNEL, sizeof(direct_mmap_node_t));
+ dmmap->addr = addr;
+ list_add(&dmmap->list, ¤t->mm->context.direct_list);
+
+ /* and perform the mapping */
+ if(flag == MAP_DISCONT){
+ ret = direct_remap_disc_page_range(addr, phys_addr, tot_pages, prot);
+ } else {
+ ret = direct_remap_page_range(addr, phys_addr, size, prot);
+ }
+
+ if(ret == 0)
+ ret = addr;
+
+out:
+ return ret;
+}
+
+/* most of the checks, refcnt updates, cache stuff have been thrown out as they are not
+ * needed
+ */
+static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address,
+ unsigned long size)
+{
+ unsigned long offset;
+ pte_t * ptep;
+ int freed = 0;
+
+ if (pmd_none(*pmd))
+ return 0;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return 0;
+ }
+ ptep = pte_offset(pmd, address);
+ offset = address & ~PMD_MASK;
+ if (offset + size > PMD_SIZE)
+ size = PMD_SIZE - offset;
+ size &= PAGE_MASK;
+ for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+ pte_t pte = *ptep;
+ if (pte_none(pte))
+ continue;
+ freed ++;
+ direct_pte_clear(ptep);
+ }
+
+ return freed;
+}
+
+static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir,
+ unsigned long address, unsigned long size)
+{
+ pmd_t * pmd;
+ unsigned long end;
+ int freed;
+
+ if (pgd_none(*dir))
+ return 0;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return 0;
+ }
+ pmd = pmd_offset(dir, address);
+ end = address + size;
+ if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+ end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+ freed = 0;
+ do {
+ freed += direct_zap_pte_range(tlb, pmd, address, end - address);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return freed;
+}
+
+/*
+ * remove user pages in a given range.
+ */
+void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
+{
+ mmu_gather_t *tlb;
+ pgd_t * dir;
+ unsigned long start = address, end = address + size;
+ int freed = 0;
+
+ dir = pgd_offset(mm, address);
+
+ /*
+ * This is a long-lived spinlock. That's fine.
+ * There's no contention, because the page table
+ * lock only protects against kswapd anyway, and
+ * even if kswapd happened to be looking at this
+ * process we _want_ it to get stuck.
+ */
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ flush_cache_range(mm, address, end);
+ tlb = tlb_gather_mmu(mm);
+
+ do {
+ freed += direct_zap_pmd_range(tlb, dir, address, end - address);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (address && (address < end));
+
+ /* this will flush any remaining tlb entries */
+ tlb_finish_mmu(tlb, start, end);
+
+ /* decrementing rss removed */
+
+ spin_unlock(&mm->page_table_lock);
+}
+
+int direct_unmap(unsigned long addr, unsigned long size)
+{
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = ¤t->mm->context.direct_list;
+
+ curr = direct_list->next;
+ while(curr != direct_list){
+ node = list_entry(curr, direct_mmap_node_t, list);
+ if(node->addr == addr)
+ break;
+ curr = curr->next;
+ }
+
+ if(curr == direct_list)
+ return -1;
+
+ list_del(&node->list);
+ kfree(node);
+
+ direct_zap_page_range(current->mm, addr, size);
+
+ return 0;
+}
+
+int direct_disc_unmap(unsigned long from, unsigned long first_pg, int tot_pages)
+{
+ int count = 0;
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = ¤t->mm->context.direct_list;
+
+ curr = direct_list->next;
+ while(curr != direct_list){
+ node = list_entry(curr, direct_mmap_node_t, list);
+ if(node->addr == from)
+ break;
+ curr = curr->next;
+ }
+
+ if(curr == direct_list)
+ return -1;
+
+ list_del(&node->list);
+ kfree(node);
+
+ while(count < tot_pages){
+ direct_zap_page_range(current->mm, from, PAGE_SIZE);
+ from += PAGE_SIZE;
+ count++;
+ }
+
+ return 0;
+}
+
/******************************************************************************
* dom0_ops.h
*
* Process command requests from domain-0 guest OS.
*
- * Copyright (c) 2002, K A Fraser
+ * Copyright (c) 2002, K A Fraser, B Dragovic
*/
#define DOM0_NEWDOMAIN 0
#define DOM0_KILLDOMAIN 1
+#define DOM0_MAPTASK 2
+#define MAP_DOM_MEM 3
+#define DOM0_STARTDOM 4
+#define MAX_CMD 4
+
+#define MAX_CMD_LEN 256
typedef struct dom0_newdomain_st
{
unsigned int memory_kb;
- unsigned int num_vifs;
+ unsigned int num_vifs; // temporary
+ unsigned int domain; // return parameter
} dom0_newdomain_t;
typedef struct dom0_killdomain_st
unsigned int domain;
} dom0_killdomain_t;
+typedef struct dom0_map_ts
+{
+ unsigned int domain;
+ unsigned long ts_phy_addr;
+} dom0_tsmap_t;
+
+typedef struct dom_mem_req
+{
+ unsigned int domain;
+ unsigned long start_pfn;
+ int tot_pages;
+} dom_mem_req_t;
+
+typedef struct domain_launch
+{
+ unsigned int domain;
+ unsigned long l2_pgt_addr;
+ unsigned long virt_load_addr;
+ unsigned long virt_shinfo_addr;
+ unsigned long virt_startinfo_addr;
+ char cmd_line[MAX_CMD_LEN];
+} dom_meminfo_t;
+
typedef struct dom0_op_st
{
unsigned long cmd;
{
dom0_newdomain_t newdomain;
dom0_killdomain_t killdomain;
+ dom0_tsmap_t mapdomts;
+ dom_mem_req_t reqdommem;
}
u;
} dom0_op_t;
+
--- /dev/null
+
+/******************************************************************************
+ * dom0_ops.h
+ *
+ * Data structures defined in hypervisor code but needed in DOM0 as well.
+ * Contents of this file should be kept in sync with the hypervisor ones
+ * unless you do not want something terrible :) to happen.
+ *
+ * Copyright (c) 2002, Keir Fraser & Boris Dragovic
+ */
+
+
+/* original version: xen-2.4.16/include/xeno/mm.h */
+typedef struct pfn_info {
+ struct list_head list; /* ->mapping has some page lists. */
+ unsigned long next; /* used for threading pages belonging */
+ unsigned long prev; /* to same domain */
+ unsigned long flags; /* atomic flags. */
+ unsigned long tot_count; /* Total domain usage count. */
+ unsigned long type_count; /* pagetable/dir, or domain-writeable refs. */
+} frame_table_t;
+
+extern frame_table_t * frame_table;
+
+typedef struct proc_data {
+ unsigned int domain;
+ unsigned long map_size;
+} dom_procdata_t;
+
+typedef struct proc_mem_data {
+ unsigned long pfn;
+ int tot_pages;
+} proc_memdata_t;
+
O_TARGET := mm.o
-obj-y := init.o fault.o extable.o hypervisor.o
+obj-y := init.o fault.o extable.o hypervisor.o get_unmapped_area.o mmu_context.o
include $(TOPDIR)/Rules.make
--- /dev/null
+
+#include <linux/slab.h>
+#include <linux/shm.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/swapctl.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+static int direct_mapped(unsigned long addr)
+{
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = ¤t->mm->context.direct_list;
+
+ /* now, this loop is going to make things slow, maybe should think
+ * of a better way to implement it, maybe without list_head
+ */
+ curr = direct_list->next;
+ while(curr != direct_list){
+ node = list_entry(curr, direct_mmap_node_t, list);
+ if(node->addr == addr)
+ break;
+ curr = curr->next;
+ }
+
+ if(curr == direct_list)
+ return 0;
+
+ return 1;
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vma;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+
+ /* here we check whether the vma is big enough and we also check
+ * whether it has already been direct mapped, in which case it
+ * is not available. this is the only difference to generic
+ * arch_get_unmapped_area.
+ */
+ if ((!vma || addr + len <= vma->vm_start) && !direct_mapped(addr))
+ return addr;
+
+ addr = vma->vm_end;
+ }
+}
--- /dev/null
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ INIT_LIST_HEAD(&mm->context.direct_list);
+ return 0;
+}
+
+/* just free all elements of list identifying directly mapped areas */
+void destroy_context(struct mm_struct *mm)
+{
+ direct_mmap_node_t * node;
+ struct list_head * curr;
+ struct list_head * direct_list = &mm->context.direct_list;
+
+ curr = direct_list->next;
+ while(curr != direct_list){
+ node = list_entry(curr, direct_mmap_node_t, list);
+ curr = curr->next;
+ list_del(&node->list);
+ kfree(node);
+ }
+
+}
+
#ifndef __i386_MMU_H
#define __i386_MMU_H
+#include <linux/list.h>
+
+/* describes dirrectly mapped vma nodes */
+typedef struct {
+ struct list_head list;
+ unsigned long addr;
+} direct_mmap_node_t;
+
/*
* The i386 doesn't have a mmu context, but
* we put the segment information here.
typedef struct {
void *segments;
unsigned long cpuvalid;
+ struct list_head direct_list;
} mm_context_t;
#endif
+
#ifndef __I386_MMU_CONTEXT_H
#define __I386_MMU_CONTEXT_H
/*
* possibly do the LDT unload here?
*/
-#define destroy_context(mm) do { } while(0)
-#define init_new_context(tsk,mm) 0
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *);
+extern void destroy_context(struct mm_struct *);
+
+//#define destroy_context(mm) do { } while(0)
+//#define init_new_context(tsk,mm) 0
#ifdef CONFIG_SMP
#include <linux/config.h>
+#define HAVE_ARCH_UNMAPPED_AREA
+
/*
* The Linux memory management assumes a three-level page table setup. On
* the i386, we use that, but "fold" the mid level into the top-level page